mfn_t mfn;
struct vcpu *v = current;
struct p2m_domain *p2m;
- int rc;
+ int rc, fall_through = 0;
/* On Nested Virtualization, walk the guest page table.
* If this succeeds, all is fine.
if ( violation )
{
- p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w, access_x);
- rc = 1;
- goto out_put_gfn;
+ if ( p2m_mem_access_check(gpa, gla_valid, gla, access_r,
+ access_w, access_x) )
+ {
+ fall_through = 1;
+ } else {
+ /* Rights not promoted, vcpu paused, work here is done */
+ rc = 1;
+ goto out_put_gfn;
+ }
}
}
goto out_put_gfn;
}
- rc = 0;
+ /* If we fell through, the vcpu will retry now that access restrictions have
+ * been removed. It may fault again if the p2m entry type still requires so.
+ * Otherwise, this is an error condition. */
+ rc = fall_through;
+
out_put_gfn:
put_gfn(p2m->domain, gfn);
return rc;
mem_event_unpause_vcpus(d);
}
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
+bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
bool_t access_r, bool_t access_w, bool_t access_x)
{
struct vcpu *v = current;
{
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
p2m_unlock(p2m);
- return;
+ return 1;
}
p2m_unlock(p2m);
p2m_lock(p2m);
p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
p2m_unlock(p2m);
+ return 1;
}
- return;
+ return 0;
}
else if ( res > 0 )
- return; /* No space in buffer; VCPU paused */
+ return 0; /* No space in buffer; VCPU paused */
memset(&req, 0, sizeof(req));
req.type = MEM_EVENT_TYPE_ACCESS;
mem_event_put_request(d, &d->mem_event->access, &req);
/* VCPU paused, mem event request sent */
+ return 0;
}
void p2m_mem_access_resume(struct domain *d)
#ifdef __x86_64__
/* Send mem event based on the access (gla is -1ull if not available). Handles
- * the rw2rx conversion */
-void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
+ * the rw2rx conversion. Boolean return value indicates if access rights have
+ * been promoted with no underlying vcpu pause. */
+bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla,
bool_t access_r, bool_t access_w, bool_t access_x);
/* Resumes the running of the VCPU, restarting the last instruction */
void p2m_mem_access_resume(struct domain *d);
hvmmem_access_t *access);
#else
-static inline void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
+static inline bool_t p2m_mem_access_check(unsigned long gpa, bool_t gla_valid,
unsigned long gla, bool_t access_r,
bool_t access_w, bool_t access_x)
-{ }
+{ return 1; }
static inline int p2m_set_mem_access(struct domain *d,
unsigned long start_pfn,
uint32_t nr, hvmmem_access_t access)